dagloop5 commited on
Commit
6df0e07
·
verified ·
1 Parent(s): 17f392b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +957 -0
app.py ADDED
@@ -0,0 +1,957 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import sys
4
+
5
+ # Disable torch.compile / dynamo before any torch import
6
+ os.environ["TORCH_COMPILE_DISABLE"] = "1"
7
+ os.environ["TORCHDYNAMO_DISABLE"] = "1"
8
+
9
+ # Install xformers for memory-efficient attention
10
+ subprocess.run([sys.executable, "-m", "pip", "install", "xformers==0.0.32.post2", "--no-build-isolation"], check=False)
11
+
12
+ # Clone LTX-2 repo and install packages
13
+ LTX_REPO_URL = "https://github.com/Lightricks/LTX-2.git"
14
+ LTX_REPO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "LTX-2")
15
+
16
+ LTX_COMMIT = "ae855f8538843825f9015a419cf4ba5edaf5eec2" # known working commit with decode_video
17
+
18
+ if not os.path.exists(LTX_REPO_DIR):
19
+ print(f"Cloning {LTX_REPO_URL}...")
20
+ subprocess.run(["git", "clone", LTX_REPO_URL, LTX_REPO_DIR], check=True)
21
+ subprocess.run(["git", "checkout", LTX_COMMIT], cwd=LTX_REPO_DIR, check=True)
22
+
23
+ print("Installing ltx-core and ltx-pipelines from cloned repo...")
24
+ subprocess.run(
25
+ [sys.executable, "-m", "pip", "install", "--force-reinstall", "--no-deps", "-e",
26
+ os.path.join(LTX_REPO_DIR, "packages", "ltx-core"),
27
+ "-e", os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines")],
28
+ check=True,
29
+ )
30
+
31
+ sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines", "src"))
32
+ sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-core", "src"))
33
+
34
+ import logging
35
+ import random
36
+ import tempfile
37
+ from pathlib import Path
38
+ import gc
39
+ import hashlib
40
+
41
+ import torch
42
+ torch._dynamo.config.suppress_errors = True
43
+ torch._dynamo.config.disable = True
44
+
45
+ import spaces
46
+ import gradio as gr
47
+ import numpy as np
48
+ from huggingface_hub import hf_hub_download, snapshot_download
49
+ from safetensors.torch import load_file, save_file
50
+ from safetensors import safe_open
51
+ import json
52
+ import requests
53
+
54
+ from ltx_core.components.diffusion_steps import EulerDiffusionStep
55
+ from ltx_core.components.guiders import MultiModalGuider, MultiModalGuiderParams
56
+ from ltx_core.components.noisers import GaussianNoiser
57
+ from ltx_core.model.audio_vae import encode_audio as vae_encode_audio
58
+ from ltx_core.model.upsampler import upsample_video
59
+ from ltx_core.model.video_vae import TilingConfig, get_video_chunks_number, decode_video as vae_decode_video
60
+ from ltx_core.quantization import QuantizationPolicy
61
+ from ltx_core.types import Audio, AudioLatentShape, VideoPixelShape
62
+ from ltx_pipelines.distilled import DistilledPipeline
63
+ from ltx_pipelines.utils import euler_denoising_loop
64
+ from ltx_pipelines.utils.args import ImageConditioningInput
65
+ from ltx_pipelines.utils.constants import DISTILLED_SIGMA_VALUES, STAGE_2_DISTILLED_SIGMA_VALUES
66
+ from ltx_pipelines.utils.helpers import (
67
+ cleanup_memory,
68
+ combined_image_conditionings,
69
+ denoise_video_only,
70
+ encode_prompts,
71
+ simple_denoising_func,
72
+ multi_modal_guider_denoising_func,
73
+ )
74
+ from ltx_pipelines.utils.media_io import decode_audio_from_file, encode_video
75
+ from ltx_core.loader.primitives import LoraPathStrengthAndSDOps
76
+ from ltx_core.loader.sd_ops import LTXV_LORA_COMFY_RENAMING_MAP
77
+
78
+ # Force-patch xformers attention into the LTX attention module.
79
+ from ltx_core.model.transformer import attention as _attn_mod
80
+ print(f"[ATTN] Before patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
81
+ try:
82
+ from xformers.ops import memory_efficient_attention as _mea
83
+ _attn_mod.memory_efficient_attention = _mea
84
+ print(f"[ATTN] After patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
85
+ except Exception as e:
86
+ print(f"[ATTN] xformers patch FAILED: {type(e).__name__}: {e}")
87
+
88
+ logging.getLogger().setLevel(logging.INFO)
89
+
90
+ MAX_SEED = np.iinfo(np.int32).max
91
+ DEFAULT_PROMPT = (
92
+ "An astronaut hatches from a fragile egg on the surface of the Moon, "
93
+ "the shell cracking and peeling apart in gentle low-gravity motion. "
94
+ "Fine lunar dust lifts and drifts outward with each movement, floating "
95
+ "in slow arcs before settling back onto the ground."
96
+ )
97
+ DEFAULT_NEGATIVE_PROMPT = (
98
+ "worst quality, inconsistent motion, blurry, jittery, distorted, "
99
+ "deformed, artifacts, text, watermark, logo, frame, border, "
100
+ "low resolution, pixelated, unnatural, fake, CGI, cartoon"
101
+ )
102
+ DEFAULT_FRAME_RATE = 24.0
103
+
104
+ # Resolution presets: (width, height)
105
+ RESOLUTIONS = {
106
+ "high": {"16:9": (1536, 1024), "9:16": (1024, 1536), "1:1": (1024, 1024)},
107
+ "low": {"16:9": (768, 512), "9:16": (512, 768), "1:1": (768, 768)},
108
+ }
109
+
110
+ class LTX23DistilledA2VPipeline(DistilledPipeline):
111
+ """DistilledPipeline with optional audio conditioning."""
112
+
113
+ def __call__(
114
+ self,
115
+ prompt: str,
116
+ negative_prompt: str,
117
+ seed: int,
118
+ height: int,
119
+ width: int,
120
+ num_frames: int,
121
+ frame_rate: float,
122
+ video_guider_params: MultiModalGuiderParams,
123
+ audio_guider_params: MultiModalGuiderParams,
124
+ images: list[ImageConditioningInput],
125
+ audio_path: str | None = None,
126
+ tiling_config: TilingConfig | None = None,
127
+ enhance_prompt: bool = False,
128
+ ):
129
+ # Standard path when no audio input is provided.
130
+ print(prompt)
131
+ if audio_path is None:
132
+ return super().__call__(
133
+ prompt=prompt,
134
+ negative_prompt=negative_prompt,
135
+ seed=seed,
136
+ height=height,
137
+ width=width,
138
+ num_frames=num_frames,
139
+ frame_rate=frame_rate,
140
+ video_guider_params=video_guider_params,
141
+ audio_guider_params=audio_guider_params,
142
+ images=images,
143
+ tiling_config=tiling_config,
144
+ enhance_prompt=enhance_prompt,
145
+ )
146
+
147
+ generator = torch.Generator(device=self.device).manual_seed(seed)
148
+ noiser = GaussianNoiser(generator=generator)
149
+ stepper = EulerDiffusionStep()
150
+ dtype = torch.bfloat16
151
+
152
+ ctx_p, ctx_n = encode_prompts(
153
+ [prompt, negative_prompt],
154
+ self.model_ledger,
155
+ enhance_first_prompt=enhance_prompt,
156
+ enhance_prompt_image=images[0].path if len(images) > 0 else None,
157
+ )
158
+ v_context_p, a_context_p = ctx_p.video_encoding, ctx_p.audio_encoding
159
+ v_context_n, a_context_n = ctx_n.video_encoding, ctx_n.audio_encoding
160
+
161
+ video_duration = num_frames / frame_rate
162
+ decoded_audio = decode_audio_from_file(audio_path, self.device, 0.0, video_duration)
163
+ if decoded_audio is None:
164
+ raise ValueError(f"Could not extract audio stream from {audio_path}")
165
+
166
+ encoded_audio_latent = vae_encode_audio(decoded_audio, self.model_ledger.audio_encoder())
167
+ audio_shape = AudioLatentShape.from_duration(batch=1, duration=video_duration, channels=8, mel_bins=16)
168
+ expected_frames = audio_shape.frames
169
+ actual_frames = encoded_audio_latent.shape[2]
170
+
171
+ if actual_frames > expected_frames:
172
+ encoded_audio_latent = encoded_audio_latent[:, :, :expected_frames, :]
173
+ elif actual_frames < expected_frames:
174
+ pad = torch.zeros(
175
+ encoded_audio_latent.shape[0],
176
+ encoded_audio_latent.shape[1],
177
+ expected_frames - actual_frames,
178
+ encoded_audio_latent.shape[3],
179
+ device=encoded_audio_latent.device,
180
+ dtype=encoded_audio_latent.dtype,
181
+ )
182
+ encoded_audio_latent = torch.cat([encoded_audio_latent, pad], dim=2)
183
+
184
+ video_encoder = self.model_ledger.video_encoder()
185
+ transformer = self.model_ledger.transformer()
186
+ stage_1_sigmas = torch.tensor(DISTILLED_SIGMA_VALUES, device=self.device)
187
+
188
+ def stage1_denoising_loop(sigmas, video_state, audio_state, stepper):
189
+ return euler_denoising_loop(
190
+ sigmas=sigmas,
191
+ video_state=video_state,
192
+ audio_state=audio_state,
193
+ stepper=stepper,
194
+ denoise_fn=multi_modal_guider_denoising_func(
195
+ video_guider=MultiModalGuider(
196
+ params=video_guider_params,
197
+ negative_context=v_context_n,
198
+ ),
199
+ audio_guider=MultiModalGuider(
200
+ params=audio_guider_params,
201
+ negative_context=a_context_n,
202
+ ),
203
+ v_context=v_context_p,
204
+ a_context=a_context_p,
205
+ transformer=transformer,
206
+ ),
207
+ )
208
+
209
+ def stage2_denoising_loop(sigmas, video_state, audio_state, stepper):
210
+ return euler_denoising_loop(
211
+ sigmas=sigmas,
212
+ video_state=video_state,
213
+ audio_state=audio_state,
214
+ stepper=stepper,
215
+ denoise_fn=simple_denoising_func(
216
+ video_context=v_context_p,
217
+ audio_context=a_context_p,
218
+ transformer=transformer,
219
+ ),
220
+ )
221
+
222
+ stage_1_output_shape = VideoPixelShape(
223
+ batch=1,
224
+ frames=num_frames,
225
+ width=width // 2,
226
+ height=height // 2,
227
+ fps=frame_rate,
228
+ )
229
+ stage_1_conditionings = combined_image_conditionings(
230
+ images=images,
231
+ height=stage_1_output_shape.height,
232
+ width=stage_1_output_shape.width,
233
+ video_encoder=video_encoder,
234
+ dtype=dtype,
235
+ device=self.device,
236
+ )
237
+ video_state = denoise_video_only(
238
+ output_shape=stage_1_output_shape,
239
+ conditionings=stage_1_conditionings,
240
+ noiser=noiser,
241
+ sigmas=stage_1_sigmas,
242
+ stepper=stepper,
243
+ denoising_loop_fn=stage1_denoising_loop,
244
+ components=self.pipeline_components,
245
+ dtype=dtype,
246
+ device=self.device,
247
+ initial_audio_latent=encoded_audio_latent,
248
+ )
249
+
250
+ torch.cuda.synchronize()
251
+ cleanup_memory()
252
+
253
+ upscaled_video_latent = upsample_video(
254
+ latent=video_state.latent[:1],
255
+ video_encoder=video_encoder,
256
+ upsampler=self.model_ledger.spatial_upsampler(),
257
+ )
258
+ stage_2_sigmas = torch.tensor(STAGE_2_DISTILLED_SIGMA_VALUES, device=self.device)
259
+ stage_2_output_shape = VideoPixelShape(batch=1, frames=num_frames, width=width, height=height, fps=frame_rate)
260
+ stage_2_conditionings = combined_image_conditionings(
261
+ images=images,
262
+ height=stage_2_output_shape.height,
263
+ width=stage_2_output_shape.width,
264
+ video_encoder=video_encoder,
265
+ dtype=dtype,
266
+ device=self.device,
267
+ )
268
+ video_state = denoise_video_only(
269
+ output_shape=stage_2_output_shape,
270
+ conditionings=stage_2_conditionings,
271
+ noiser=noiser,
272
+ sigmas=stage_2_sigmas,
273
+ stepper=stepper,
274
+ denoising_loop_fn=stage2_denoising_loop,
275
+ components=self.pipeline_components,
276
+ dtype=dtype,
277
+ device=self.device,
278
+ noise_scale=stage_2_sigmas[0],
279
+ initial_video_latent=upscaled_video_latent,
280
+ initial_audio_latent=encoded_audio_latent,
281
+ )
282
+
283
+ torch.cuda.synchronize()
284
+ del transformer
285
+ del video_encoder
286
+ cleanup_memory()
287
+
288
+ decoded_video = vae_decode_video(
289
+ video_state.latent,
290
+ self.model_ledger.video_decoder(),
291
+ tiling_config,
292
+ generator,
293
+ )
294
+ original_audio = Audio(
295
+ waveform=decoded_audio.waveform.squeeze(0),
296
+ sampling_rate=decoded_audio.sampling_rate,
297
+ )
298
+ return decoded_video, original_audio
299
+
300
+ # Model repos
301
+ LTX_MODEL_REPO = "Lightricks/LTX-2.3"
302
+ GEMMA_REPO ="Lightricks/gemma-3-12b-it-qat-q4_0-unquantized"
303
+
304
+ # Download model checkpoints
305
+ print("=" * 80)
306
+ print("Downloading LTX-2.3 distilled model + Gemma...")
307
+ print("=" * 80)
308
+
309
+ # LoRA cache directory and currently-applied key
310
+ LORA_CACHE_DIR = Path("lora_cache")
311
+ LORA_CACHE_DIR.mkdir(exist_ok=True)
312
+ current_lora_key: str | None = None
313
+
314
+ PENDING_LORA_KEY: str | None = None
315
+ PENDING_LORA_STATE: dict[str, torch.Tensor] | None = None
316
+ PENDING_LORA_STATUS: str = "No LoRA state prepared yet."
317
+
318
+ weights_dir = Path("weights")
319
+ weights_dir.mkdir(exist_ok=True)
320
+ checkpoint_path = hf_hub_download(
321
+ repo_id=LTX_MODEL_REPO,
322
+ filename="ltx-2.3-22b-distilled-1.1.safetensors",
323
+ local_dir=str(weights_dir),
324
+ local_dir_use_symlinks=False,
325
+ )
326
+ spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.1.safetensors")
327
+ gemma_root = snapshot_download(repo_id=GEMMA_REPO)
328
+
329
+
330
+ # ---- Insert block (LoRA downloads) between lines 268 and 269 ----
331
+ # LoRA repo + download the requested LoRA adapters
332
+ LORA_REPO = "dagloop5/LoRA"
333
+
334
+ print("=" * 80)
335
+ print("Downloading LoRA adapters from dagloop5/LoRA...")
336
+ print("=" * 80)
337
+ pose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2_3_NSFW_furry_concat_v2.safetensors")
338
+ general_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2.3_reasoning_I2V_V3.safetensors")
339
+ motion_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="motion_helper.safetensors")
340
+ dreamlay_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="DR34ML4Y_LTXXX_PREVIEW_RC1.safetensors") # m15510n4ry, bl0wj0b, d0ubl3_bj, d0gg1e, c0wg1rl
341
+ mself_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="Furry Hyper Masturbation - LTX-2 I2V v1.safetensors") # Hyperfap
342
+ dramatic_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX-2.3 - Orgasm.safetensors") # "[He | She] is having am orgasm." (am or an?)
343
+ fluid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="cr3ampi3_animation_i2v_ltx2_v1.0.safetensors") # cr3ampi3 animation., missionary animation, doggystyle bouncy animation, double penetration animation
344
+ liquid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="liquid_wet_dr1pp_ltx2_v1.0_scaled.safetensors") # wet dr1pp
345
+ demopose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="clapping-cheeks-audio-v001-alpha.safetensors")
346
+ voice_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="hentai_voice_ltx23.safetensors")
347
+ realism_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="FurryenhancerLTX2.3V1.215.safetensors")
348
+ transition_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX-2_takerpov_lora_v1.2.safetensors") # takerpov1, taker pov
349
+
350
+ print(f"Pose LoRA: {pose_lora_path}")
351
+ print(f"General LoRA: {general_lora_path}")
352
+ print(f"Motion LoRA: {motion_lora_path}")
353
+ print(f"Dreamlay LoRA: {dreamlay_lora_path}")
354
+ print(f"Mself LoRA: {mself_lora_path}")
355
+ print(f"Dramatic LoRA: {dramatic_lora_path}")
356
+ print(f"Fluid LoRA: {fluid_lora_path}")
357
+ print(f"Liquid LoRA: {liquid_lora_path}")
358
+ print(f"Demopose LoRA: {demopose_lora_path}")
359
+ print(f"Voice LoRA: {voice_lora_path}")
360
+ print(f"Realism LoRA: {realism_lora_path}")
361
+ print(f"Transition LoRA: {transition_lora_path}")
362
+ # ----------------------------------------------------------------
363
+
364
+ print(f"Checkpoint: {checkpoint_path}")
365
+ print(f"Spatial upsampler: {spatial_upsampler_path}")
366
+ print(f"[Gemma] Root ready: {gemma_root}")
367
+
368
+ pipeline = LTX23DistilledA2VPipeline(
369
+ distilled_checkpoint_path=checkpoint_path,
370
+ spatial_upsampler_path=spatial_upsampler_path,
371
+ gemma_root=gemma_root,
372
+ loras=[],
373
+ quantization=QuantizationPolicy.fp8_cast(),
374
+ )
375
+
376
+ def _make_lora_key(pose_strength: float, general_strength: float, motion_strength: float, dreamlay_strength: float, mself_strength: float, dramatic_strength: float, fluid_strength: float, liquid_strength: float, demopose_strength: float, voice_strength: float, realism_strength: float, transition_strength: float) -> tuple[str, str]:
377
+ rp = round(float(pose_strength), 2)
378
+ rg = round(float(general_strength), 2)
379
+ rm = round(float(motion_strength), 2)
380
+ rd = round(float(dreamlay_strength), 2)
381
+ rs = round(float(mself_strength), 2)
382
+ rr = round(float(dramatic_strength), 2)
383
+ rf = round(float(fluid_strength), 2)
384
+ rl = round(float(liquid_strength), 2)
385
+ ro = round(float(demopose_strength), 2)
386
+ rv = round(float(voice_strength), 2)
387
+ re = round(float(realism_strength), 2)
388
+ rt = round(float(transition_strength), 2)
389
+ key_str = f"{pose_lora_path}:{rp}|{general_lora_path}:{rg}|{motion_lora_path}:{rm}|{dreamlay_lora_path}:{rd}|{mself_lora_path}:{rs}|{dramatic_lora_path}:{rr}|{fluid_lora_path}:{rf}|{liquid_lora_path}:{rl}|{demopose_lora_path}:{ro}|{voice_lora_path}:{rv}|{realism_lora_path}:{re}|{transition_lora_path}:{rt}"
390
+ key = hashlib.sha256(key_str.encode("utf-8")).hexdigest()
391
+ return key, key_str
392
+
393
+
394
+ def prepare_lora_cache(
395
+ pose_strength: float,
396
+ general_strength: float,
397
+ motion_strength: float,
398
+ dreamlay_strength: float,
399
+ mself_strength: float,
400
+ dramatic_strength: float,
401
+ fluid_strength: float,
402
+ liquid_strength: float,
403
+ demopose_strength: float,
404
+ voice_strength: float,
405
+ realism_strength: float,
406
+ transition_strength: float,
407
+ progress=gr.Progress(track_tqdm=True),
408
+ ):
409
+ """
410
+ CPU-only step:
411
+ - checks cache
412
+ - loads cached fused transformer state_dict, or
413
+ - builds fused transformer on CPU and saves it
414
+ The resulting state_dict is stored in memory and can be applied later.
415
+ """
416
+ global PENDING_LORA_KEY, PENDING_LORA_STATE, PENDING_LORA_STATUS
417
+
418
+ ledger = pipeline.model_ledger
419
+ key, _ = _make_lora_key(pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength, voice_strength, realism_strength, transition_strength)
420
+ cache_path = LORA_CACHE_DIR / f"{key}.safetensors"
421
+
422
+ progress(0.05, desc="Preparing LoRA state")
423
+ if cache_path.exists():
424
+ try:
425
+ progress(0.20, desc="Loading cached fused state")
426
+ state = load_file(str(cache_path))
427
+ PENDING_LORA_KEY = key
428
+ PENDING_LORA_STATE = state
429
+ PENDING_LORA_STATUS = f"Loaded cached LoRA state: {cache_path.name}"
430
+ return PENDING_LORA_STATUS
431
+ except Exception as e:
432
+ print(f"[LoRA] Cache load failed: {type(e).__name__}: {e}")
433
+
434
+ entries = [
435
+ (pose_lora_path, round(float(pose_strength), 2)),
436
+ (general_lora_path, round(float(general_strength), 2)),
437
+ (motion_lora_path, round(float(motion_strength), 2)),
438
+ (dreamlay_lora_path, round(float(dreamlay_strength), 2)),
439
+ (mself_lora_path, round(float(mself_strength), 2)),
440
+ (dramatic_lora_path, round(float(dramatic_strength), 2)),
441
+ (fluid_lora_path, round(float(fluid_strength), 2)),
442
+ (liquid_lora_path, round(float(liquid_strength), 2)),
443
+ (demopose_lora_path, round(float(demopose_strength), 2)),
444
+ (voice_lora_path, round(float(voice_strength), 2)),
445
+ (realism_lora_path, round(float(realism_strength), 2)),
446
+ (transition_lora_path, round(float(transition_strength), 2)),
447
+ ]
448
+ loras_for_builder = [
449
+ LoraPathStrengthAndSDOps(path, strength, LTXV_LORA_COMFY_RENAMING_MAP)
450
+ for path, strength in entries
451
+ if path is not None and float(strength) != 0.0
452
+ ]
453
+
454
+ if not loras_for_builder:
455
+ PENDING_LORA_KEY = None
456
+ PENDING_LORA_STATE = None
457
+ PENDING_LORA_STATUS = "No non-zero LoRA strengths selected; nothing to prepare."
458
+ return PENDING_LORA_STATUS
459
+
460
+ tmp_ledger = None
461
+ new_transformer_cpu = None
462
+ try:
463
+ progress(0.35, desc="Building fused CPU transformer")
464
+ tmp_ledger = pipeline.model_ledger.__class__(
465
+ dtype=ledger.dtype,
466
+ device=torch.device("cpu"),
467
+ checkpoint_path=str(checkpoint_path),
468
+ spatial_upsampler_path=str(spatial_upsampler_path),
469
+ gemma_root_path=str(gemma_root),
470
+ loras=tuple(loras_for_builder),
471
+ quantization=getattr(ledger, "quantization", None),
472
+ )
473
+ new_transformer_cpu = tmp_ledger.transformer()
474
+
475
+ progress(0.70, desc="Extracting fused state_dict")
476
+ state = {
477
+ k: v.detach().cpu().contiguous()
478
+ for k, v in new_transformer_cpu.state_dict().items()
479
+ }
480
+ save_file(state, str(cache_path))
481
+
482
+ PENDING_LORA_KEY = key
483
+ PENDING_LORA_STATE = state
484
+ PENDING_LORA_STATUS = f"Built and cached LoRA state: {cache_path.name}"
485
+ return PENDING_LORA_STATUS
486
+
487
+ except Exception as e:
488
+ import traceback
489
+ print(f"[LoRA] Prepare failed: {type(e).__name__}: {e}")
490
+ print(traceback.format_exc())
491
+ PENDING_LORA_KEY = None
492
+ PENDING_LORA_STATE = None
493
+ PENDING_LORA_STATUS = f"LoRA prepare failed: {type(e).__name__}: {e}"
494
+ return PENDING_LORA_STATUS
495
+
496
+ finally:
497
+ try:
498
+ del new_transformer_cpu
499
+ except Exception:
500
+ pass
501
+ try:
502
+ del tmp_ledger
503
+ except Exception:
504
+ pass
505
+ gc.collect()
506
+
507
+
508
+ def apply_prepared_lora_state_to_pipeline():
509
+ """
510
+ Fast step: copy the already prepared CPU state into the live transformer.
511
+ This is the only part that should remain near generation time.
512
+ """
513
+ global current_lora_key, PENDING_LORA_KEY, PENDING_LORA_STATE
514
+
515
+ if PENDING_LORA_STATE is None or PENDING_LORA_KEY is None:
516
+ print("[LoRA] No prepared LoRA state available; skipping.")
517
+ return False
518
+
519
+ if current_lora_key == PENDING_LORA_KEY:
520
+ print("[LoRA] Prepared LoRA state already active; skipping.")
521
+ return True
522
+
523
+ existing_transformer = _transformer
524
+ with torch.no_grad():
525
+ missing, unexpected = existing_transformer.load_state_dict(PENDING_LORA_STATE, strict=False)
526
+ if missing or unexpected:
527
+ print(f"[LoRA] load_state_dict mismatch: missing={len(missing)}, unexpected={len(unexpected)}")
528
+
529
+ current_lora_key = PENDING_LORA_KEY
530
+ print("[LoRA] Prepared LoRA state applied to the pipeline.")
531
+ return True
532
+
533
+ # Preload all models for ZeroGPU tensor packing.
534
+ print("Preloading all models (including Gemma and audio components)...")
535
+ ledger = pipeline.model_ledger
536
+
537
+ # Save the original factory methods so we can rebuild individual components later.
538
+ # These are bound callables on ledger that will call the builder when invoked.
539
+ _orig_transformer_factory = ledger.transformer
540
+ _orig_video_encoder_factory = ledger.video_encoder
541
+ _orig_video_decoder_factory = ledger.video_decoder
542
+ _orig_audio_encoder_factory = ledger.audio_encoder
543
+ _orig_audio_decoder_factory = ledger.audio_decoder
544
+ _orig_vocoder_factory = ledger.vocoder
545
+ _orig_spatial_upsampler_factory = ledger.spatial_upsampler
546
+ _orig_text_encoder_factory = ledger.text_encoder
547
+ _orig_gemma_embeddings_factory = ledger.gemma_embeddings_processor
548
+
549
+ # Call the original factories once to create the cached instances we will serve by default.
550
+ _transformer = _orig_transformer_factory()
551
+ _video_encoder = _orig_video_encoder_factory()
552
+ _video_decoder = _orig_video_decoder_factory()
553
+ _audio_encoder = _orig_audio_encoder_factory()
554
+ _audio_decoder = _orig_audio_decoder_factory()
555
+ _vocoder = _orig_vocoder_factory()
556
+ _spatial_upsampler = _orig_spatial_upsampler_factory()
557
+ _text_encoder = _orig_text_encoder_factory()
558
+ _embeddings_processor = _orig_gemma_embeddings_factory()
559
+
560
+ # Replace ledger methods with lightweight lambdas that return the cached instances.
561
+ # We keep the original factories above so we can call them later to rebuild components.
562
+ ledger.transformer = lambda: _transformer
563
+ ledger.video_encoder = lambda: _video_encoder
564
+ ledger.video_decoder = lambda: _video_decoder
565
+ ledger.audio_encoder = lambda: _audio_encoder
566
+ ledger.audio_decoder = lambda: _audio_decoder
567
+ ledger.vocoder = lambda: _vocoder
568
+ ledger.spatial_upsampler = lambda: _spatial_upsampler
569
+ ledger.text_encoder = lambda: _text_encoder
570
+ ledger.gemma_embeddings_processor = lambda: _embeddings_processor
571
+
572
+ print("All models preloaded (including Gemma text encoder and audio encoder)!")
573
+ # ---- REPLACE PRELOAD BLOCK END ----
574
+
575
+ print("=" * 80)
576
+ print("Pipeline ready!")
577
+ print("=" * 80)
578
+
579
+ def log_memory(tag: str):
580
+ if torch.cuda.is_available():
581
+ allocated = torch.cuda.memory_allocated() / 1024**3
582
+ peak = torch.cuda.max_memory_allocated() / 1024**3
583
+ free, total = torch.cuda.mem_get_info()
584
+ print(f"[VRAM {tag}] allocated={allocated:.2f}GB peak={peak:.2f}GB free={free / 1024**3:.2f}GB total={total / 1024**3:.2f}GB")
585
+
586
+
587
+ def detect_aspect_ratio(image) -> str:
588
+ if image is None:
589
+ return "16:9"
590
+ if hasattr(image, "size"):
591
+ w, h = image.size
592
+ elif hasattr(image, "shape"):
593
+ h, w = image.shape[:2]
594
+ else:
595
+ return "16:9"
596
+ ratio = w / h
597
+ candidates = {"16:9": 16 / 9, "9:16": 9 / 16, "1:1": 1.0}
598
+ return min(candidates, key=lambda k: abs(ratio - candidates[k]))
599
+
600
+
601
+ def on_image_upload(first_image, last_image, high_res):
602
+ ref_image = first_image if first_image is not None else last_image
603
+ aspect = detect_aspect_ratio(ref_image)
604
+ tier = "high" if high_res else "low"
605
+ w, h = RESOLUTIONS[tier][aspect]
606
+ return gr.update(value=w), gr.update(value=h)
607
+
608
+
609
+ def on_highres_toggle(first_image, last_image, high_res):
610
+ ref_image = first_image if first_image is not None else last_image
611
+ aspect = detect_aspect_ratio(ref_image)
612
+ tier = "high" if high_res else "low"
613
+ w, h = RESOLUTIONS[tier][aspect]
614
+ return gr.update(value=w), gr.update(value=h)
615
+
616
+ def get_gpu_duration(
617
+ first_image,
618
+ last_image,
619
+ input_audio,
620
+ prompt: str,
621
+ negative_prompt: str,
622
+ duration: float,
623
+ gpu_duration: float,
624
+ enhance_prompt: bool = True,
625
+ seed: int = 42,
626
+ randomize_seed: bool = True,
627
+ height: int = 1024,
628
+ width: int = 1536,
629
+ video_cfg_scale: float = 1.0,
630
+ video_stg_scale: float = 0.0,
631
+ video_rescale_scale: float = 0.45,
632
+ video_a2v_scale: float = 3.0,
633
+ audio_cfg_scale: float = 1.0,
634
+ audio_stg_scale: float = 0.0,
635
+ audio_rescale_scale: float = 1.0,
636
+ audio_v2a_scale: float = 3.0,
637
+ pose_strength: float = 0.0,
638
+ general_strength: float = 0.0,
639
+ motion_strength: float = 0.0,
640
+ dreamlay_strength: float = 0.0,
641
+ mself_strength: float = 0.0,
642
+ dramatic_strength: float = 0.0,
643
+ fluid_strength: float = 0.0,
644
+ liquid_strength: float = 0.0,
645
+ demopose_strength: float = 0.0,
646
+ voice_strength: float = 0.0,
647
+ realism_strength: float = 0.0,
648
+ transition_strength: float = 0.0,
649
+ progress=None,
650
+ ):
651
+ return int(gpu_duration)
652
+
653
+ @spaces.GPU(duration=get_gpu_duration)
654
+ @torch.inference_mode()
655
+ def generate_video(
656
+ first_image,
657
+ last_image,
658
+ input_audio,
659
+ prompt: str,
660
+ negative_prompt: str,
661
+ duration: float,
662
+ gpu_duration: float,
663
+ enhance_prompt: bool = True,
664
+ seed: int = 42,
665
+ randomize_seed: bool = True,
666
+ height: int = 1024,
667
+ width: int = 1536
668
+ video_cfg_scale: float = 1.0,
669
+ video_stg_scale: float = 0.0,
670
+ video_rescale_scale: float = 0.45,
671
+ video_a2v_scale: float = 3.0,
672
+ audio_cfg_scale: float = 1.0,
673
+ audio_stg_scale: float = 0.0,
674
+ audio_rescale_scale: float = 1.0,
675
+ audio_v2a_scale: float = 3.0,
676
+ pose_strength: float = 0.0,
677
+ general_strength: float = 0.0,
678
+ motion_strength: float = 0.0,
679
+ dreamlay_strength: float = 0.0,
680
+ mself_strength: float = 0.0,
681
+ dramatic_strength: float = 0.0,
682
+ fluid_strength: float = 0.0,
683
+ liquid_strength: float = 0.0,
684
+ demopose_strength: float = 0.0,
685
+ voice_strength: float = 0.0,
686
+ realism_strength: float = 0.0,
687
+ transition_strength: float = 0.0,
688
+ progress=gr.Progress(track_tqdm=True),
689
+ ):
690
+ try:
691
+ torch.cuda.reset_peak_memory_stats()
692
+ log_memory("start")
693
+
694
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
695
+
696
+ frame_rate = DEFAULT_FRAME_RATE
697
+ num_frames = int(duration * frame_rate) + 1
698
+ num_frames = ((num_frames - 1 + 7) // 8) * 8 + 1
699
+
700
+ print(f"Generating: {height}x{width}, {num_frames} frames ({duration}s), seed={current_seed}")
701
+
702
+ images = []
703
+ output_dir = Path("outputs")
704
+ output_dir.mkdir(exist_ok=True)
705
+
706
+ if first_image is not None:
707
+ temp_first_path = output_dir / f"temp_first_{current_seed}.jpg"
708
+ if hasattr(first_image, "save"):
709
+ first_image.save(temp_first_path)
710
+ else:
711
+ temp_first_path = Path(first_image)
712
+ images.append(ImageConditioningInput(path=str(temp_first_path), frame_idx=0, strength=1.0))
713
+
714
+ if last_image is not None:
715
+ temp_last_path = output_dir / f"temp_last_{current_seed}.jpg"
716
+ if hasattr(last_image, "save"):
717
+ last_image.save(temp_last_path)
718
+ else:
719
+ temp_last_path = Path(last_image)
720
+ images.append(ImageConditioningInput(path=str(temp_last_path), frame_idx=num_frames - 1, strength=1.0))
721
+
722
+ tiling_config = TilingConfig.default()
723
+ video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
724
+
725
+ video_guider_params = MultiModalGuiderParams(
726
+ cfg_scale=video_cfg_scale,
727
+ stg_scale=video_stg_scale,
728
+ rescale_scale=video_rescale_scale,
729
+ modality_scale=video_a2v_scale,
730
+ skip_step=0,
731
+ stg_blocks=[],
732
+ )
733
+
734
+ audio_guider_params = MultiModalGuiderParams(
735
+ cfg_scale=audio_cfg_scale,
736
+ stg_scale=audio_stg_scale,
737
+ rescale_scale=audio_rescale_scale,
738
+ modality_scale=audio_v2a_scale,
739
+ skip_step=0,
740
+ stg_blocks=[],
741
+ )
742
+
743
+ log_memory("before pipeline call")
744
+
745
+ apply_prepared_lora_state_to_pipeline()
746
+
747
+ video, audio = pipeline(
748
+ prompt=prompt,
749
+ negative_prompt=negative_prompt,
750
+ seed=current_seed,
751
+ height=int(height),
752
+ width=int(width),
753
+ num_frames=num_frames,
754
+ frame_rate=frame_rate,
755
+ video_guider_params=video_guider_params,
756
+ audio_guider_params=audio_guider_params,
757
+ images=images,
758
+ audio_path=input_audio,
759
+ tiling_config=tiling_config,
760
+ enhance_prompt=enhance_prompt,
761
+ )
762
+
763
+ log_memory("after pipeline call")
764
+
765
+ output_path = tempfile.mktemp(suffix=".mp4")
766
+ encode_video(
767
+ video=video,
768
+ fps=frame_rate,
769
+ audio=audio,
770
+ output_path=output_path,
771
+ video_chunks_number=video_chunks_number,
772
+ )
773
+
774
+ log_memory("after encode_video")
775
+ return str(output_path), current_seed
776
+
777
+ except Exception as e:
778
+ import traceback
779
+ log_memory("on error")
780
+ print(f"Error: {str(e)}\n{traceback.format_exc()}")
781
+ return None, current_seed
782
+
783
+ # =============================================================================
784
+ # Gradio UI
785
+ # =============================================================================
786
+
787
+ css = """
788
+ .fillable {max-width: 1200px !important}
789
+ .progress-text {color: black}
790
+ """
791
+
792
+ with gr.Blocks(title="LTX-2.3 Distilled with LoRAs, Negative Prompting, and Advanced Settings") as demo:
793
+ gr.Markdown("# LTX-2.3 Two-Stage HQ Video Generation")
794
+ gr.Markdown(
795
+ "High-quality text/image-to-video with cached LoRA state + CFG guidance. "
796
+ "[[Model]](https://huggingface.co/Lightricks/LTX-2.3)"
797
+ )
798
+
799
+ with gr.Row():
800
+ # LEFT SIDE: Input Controls
801
+ with gr.Column():
802
+ with gr.Row():
803
+ first_image = gr.Image(label="First Frame (Optional)", type="pil")
804
+ last_image = gr.Image(label="Last Frame (Optional)", type="pil")
805
+
806
+ prompt = gr.Textbox(
807
+ label="Prompt",
808
+ value="Make this image come alive with cinematic motion, smooth animation",
809
+ lines=3,
810
+ placeholder="Describe the motion and animation you want...",
811
+ )
812
+
813
+ negative_prompt = gr.Textbox(
814
+ label="Negative Prompt",
815
+ value="List what you specifically don't want",
816
+ lines=2,
817
+ )
818
+
819
+ duration = gr.Slider(
820
+ label="Duration (seconds)",
821
+ minimum=1.0, maximum=30.0, value=10.0, step=0.1,
822
+ )
823
+
824
+ with gr.Row():
825
+ seed = gr.Number(label="Seed", value=42, precision=0, minimum=0, maximum=MAX_SEED)
826
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
827
+
828
+ with gr.Row():
829
+ high_res = gr.Checkbox(label="High Resolution", value=True)
830
+ enhance_prompt = gr.Checkbox(label="Enhance Prompt", value=False)
831
+
832
+ with gr.Row():
833
+ width = gr.Number(label="Width", value=1536, precision=0)
834
+ height = gr.Number(label="Height", value=1024, precision=0)
835
+
836
+ generate_btn = gr.Button("Generate Video", variant="primary", size="lg")
837
+
838
+ with gr.Accordion("Advanced Settings", open=False):
839
+ gr.Markdown("### Video Guidance Parameters")
840
+
841
+ with gr.Row():
842
+ video_cfg_scale = gr.Slider(
843
+ label="Video CFG Scale", minimum=1.0, maximum=10.0,
844
+ value=LTX_2_3_HQ_PARAMS.video_guider_params.cfg_scale, step=0.1
845
+ )
846
+ video_stg_scale = gr.Slider(
847
+ label="Video STG Scale", minimum=0.0, maximum=2.0, value=0.0, step=0.1
848
+ )
849
+
850
+ with gr.Row():
851
+ video_rescale_scale = gr.Slider(
852
+ label="Video Rescale", minimum=0.0, maximum=2.0, value=0.45, step=0.1
853
+ )
854
+ video_a2v_scale = gr.Slider(
855
+ label="A2V Scale", minimum=0.0, maximum=5.0, value=3.0, step=0.1
856
+ )
857
+
858
+ gr.Markdown("### Audio Guidance Parameters")
859
+
860
+ with gr.Row():
861
+ audio_cfg_scale = gr.Slider(
862
+ label="Audio CFG Scale", minimum=1.0, maximum=15.0,
863
+ value=LTX_2_3_HQ_PARAMS.audio_guider_params.cfg_scale, step=0.1
864
+ )
865
+ audio_stg_scale = gr.Slider(
866
+ label="Audio STG Scale", minimum=0.0, maximum=2.0, value=0.0, step=0.1
867
+ )
868
+
869
+ with gr.Row():
870
+ audio_rescale_scale = gr.Slider(
871
+ label="Audio Rescale", minimum=0.0, maximum=2.0, value=1.0, step=0.1
872
+ )
873
+ audio_v2a_scale = gr.Slider(
874
+ label="V2A Scale", minimum=0.0, maximum=5.0, value=3.0, step=0.1
875
+ )
876
+ with gr.Row():
877
+ input_audio = gr.Audio(label="Audio Input (Optional)", type="filepath")
878
+
879
+ # RIGHT SIDE: Output and LoRA
880
+ with gr.Column():
881
+ output_video = gr.Video(label="Generated Video", autoplay=False)
882
+
883
+ gpu_duration = gr.Slider(
884
+ label="ZeroGPU duration (seconds)",
885
+ minimum=30.0, maximum=240.0, value=90.0, step=1.0,
886
+ info="Increase for longer videos, higher resolution, or LoRA usage"
887
+ )
888
+
889
+ gr.Markdown("### LoRA Adapter Strengths")
890
+ gr.Markdown("Set to 0 to disable, then click 'Prepare LoRA Cache'")
891
+
892
+ with gr.Row():
893
+ pose_strength = gr.Slider(label="Anthro Enhancer", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
894
+ gr.Markdown("") # Spacer for alignment
895
+
896
+ with gr.Row():
897
+ general_strength = gr.Slider(label="Reasoning Enhancer", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
898
+ motion_strength = gr.Slider(label="Anthro Posing", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
899
+
900
+ with gr.Row():
901
+ dreamlay_strength = gr.Slider(label="Dreamlay", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
902
+ mself_strength = gr.Slider(label="Mself", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
903
+
904
+ with gr.Row():
905
+ dramatic_strength = gr.Slider(label="Dramatic", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
906
+ fluid_strength = gr.Slider(label="Fluid Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
907
+
908
+ with gr.Row():
909
+ liquid_strength = gr.Slider(label="Liquid Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
910
+ demopose_strength = gr.Slider(label="Audio Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
911
+
912
+ with gr.Row():
913
+ voice_strength = gr.Slider(label="Voice Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
914
+ realism_strength = gr.Slider(label="Anthro Realism", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
915
+
916
+ with gr.Row():
917
+ transition_strength = gr.Slider(label="POV", minimum=0.0, maximum=2.0, value=0.0, step=0.01)
918
+ gr.Markdown("") # Spacer for alignment
919
+
920
+ prepare_lora_btn = gr.Button("Prepare / Load LoRA Cache", variant="secondary")
921
+ lora_status = gr.Textbox(
922
+ label="LoRA Cache Status",
923
+ value="No LoRA state prepared yet.",
924
+ interactive=False,
925
+ )
926
+
927
+ # Event handlers
928
+ first_image.change(fn=on_image_upload, inputs=[first_image, last_image, high_res], outputs=[width, height])
929
+ last_image.change(fn=on_image_upload, inputs=[first_image, last_image, high_res], outputs=[width, height])
930
+ high_res.change(fn=on_highres_toggle, inputs=[first_image, last_image, high_res], outputs=[width, height])
931
+
932
+ prepare_lora_btn.click(
933
+ fn=prepare_lora_cache,
934
+ inputs=[pose_strength, general_strength, motion_strength, dreamlay_strength,
935
+ mself_strength, dramatic_strength, fluid_strength, liquid_strength,
936
+ demopose_strength, voice_strength, realism_strength, transition_strength],
937
+ outputs=[lora_status],
938
+ )
939
+
940
+ generate_btn.click(
941
+ fn=generate_video,
942
+ inputs=[
943
+ first_image, last_image, input_audio, prompt, negative_prompt, duration, gpu_duration,
944
+ enhance_prompt, seed, randomize_seed, height, width,
945
+ video_cfg_scale, video_stg_scale, video_rescale_scale, video_a2v_scale,
946
+ audio_cfg_scale, audio_stg_scale, audio_rescale_scale, audio_v2a_scale,
947
+ pose_strength, general_strength, motion_strength,
948
+ dreamlay_strength, mself_strength, dramatic_strength, fluid_strength,
949
+ liquid_strength, demopose_strength, voice_strength, realism_strength,
950
+ transition_strength,
951
+ ],
952
+ outputs=[output_video, seed],
953
+ )
954
+
955
+
956
+ if __name__ == "__main__":
957
+ demo.queue().launch(theme=gr.themes.Citrus(), css=css, mcp_server=False)